import pandas as pd
import numpy as np
df = pd.read_csv("sentiment_product_reviews.csv")
df.head(2)
comment label
0 Moderate performance, works as intended. 1
1 The product is just okay, nothing special. 1
from nltk.corpus import stopwords
import nltk
nltk.download('stopwords')
stopwords = set(stopwords.words('english'))
stopwords.discard("no")
stopwords.discard("not")
stopwords.discard("never")
[nltk_data] Downloading package stopwords to
[nltk_data]     C:\Users\User\AppData\Roaming\nltk_data...
[nltk_data]   Package stopwords is already up-to-date!
import re
from nltk import ngrams
from nltk.stem import WordNetLemmatizer
lm = WordNetLemmatizer()
def tokenization_review(text): 
    text = re.sub(r'[^a-zA-Z0-9\s.,!?]', ' ', str(text)) 
    text = text.lower()  
    words = text.split()
    words = [lm.lemmatize(word) for word in words if word not in stopwords]
    return ' '.join(words)  
df['comment_cleaned']= df['comment'].apply(tokenization_review)
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
tokenizer = Tokenizer(char_level=False)
tokenizer.fit_on_texts(df['comment_cleaned'])
df["seq_comment"] = tokenizer.texts_to_sequences(df.comment_cleaned)
df.head(2)
comment label comment_cleaned seq_comment
0 Moderate performance, works as intended. 1 moderate performance, work intended. [71, 54, 14, 120]
1 The product is just okay, nothing special. 1 product okay, nothing special. [2, 46, 29, 67]
vocab_length=np.max(df.seq_comment.max())+2
max_seq_length = df['seq_comment'].map(len).max()
vocab_size = 150 
embedding_dim = 25  
max_length = 50  
trunc_type = 'post' 
padding_type = 'post'  
oov_tok = "<OOV>" 
X=df['seq_comment']
y=df['label']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
training_padded = pad_sequences(X_train, maxlen=max_seq_length, padding=padding_type, truncating=trunc_type)
testing_padded = pad_sequences(X_test, maxlen=max_seq_length, padding=padding_type, truncating=trunc_type)
import numpy as np
training_padded = np.array(training_padded)
y_train = np.array(y_train)
testing_padded = np.array(testing_padded)
y_test = np.array( y_test)
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
ann=tf.keras.models.Sequential()
ann.add(tf.keras.layers.Embedding(vocab_size, embedding_dim))
ann.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(128, return_sequences=False)))
ann.add(tf.keras.layers.Dense(units=128, activation='relu'))
ann.add(tf.keras.layers.Dense(units=64, activation='relu'))
ann.add(tf.keras.layers.Dense(units=3, activation='softmax'))  
ann.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
ann.fit(training_padded,y_train, batch_size=50,epochs=100, validation_data=(testing_padded,y_test))
Epoch 1/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 11s 11ms/step - accuracy: 0.8372 - loss: 0.3448 - val_accuracy: 1.0000 - val_loss: 2.9980e-05
Epoch 2/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 2s 8ms/step - accuracy: 1.0000 - loss: 2.4663e-05 - val_accuracy: 1.0000 - val_loss: 7.1422e-06
Epoch 3/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 9ms/step - accuracy: 1.0000 - loss: 6.6008e-06 - val_accuracy: 1.0000 - val_loss: 2.9599e-06
Epoch 4/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 1.0000 - loss: 2.6961e-06 - val_accuracy: 1.0000 - val_loss: 1.5824e-06
Epoch 5/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 8ms/step - accuracy: 1.0000 - loss: 1.6045e-06 - val_accuracy: 1.0000 - val_loss: 9.6320e-07
Epoch 6/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 8ms/step - accuracy: 1.0000 - loss: 9.1953e-07 - val_accuracy: 1.0000 - val_loss: 6.4053e-07
Epoch 7/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 2s 8ms/step - accuracy: 1.0000 - loss: 6.6269e-07 - val_accuracy: 1.0000 - val_loss: 4.4095e-07
Epoch 8/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 2s 8ms/step - accuracy: 1.0000 - loss: 4.7165e-07 - val_accuracy: 1.0000 - val_loss: 3.2632e-07
Epoch 9/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 8ms/step - accuracy: 1.0000 - loss: 3.4013e-07 - val_accuracy: 1.0000 - val_loss: 2.4450e-07
Epoch 10/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 2s 7ms/step - accuracy: 1.0000 - loss: 2.7341e-07 - val_accuracy: 1.0000 - val_loss: 1.8785e-07
Epoch 11/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 2s 8ms/step - accuracy: 1.0000 - loss: 2.1474e-07 - val_accuracy: 1.0000 - val_loss: 1.4286e-07
Epoch 12/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 8ms/step - accuracy: 1.0000 - loss: 1.6179e-07 - val_accuracy: 1.0000 - val_loss: 1.1644e-07
Epoch 13/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 2s 7ms/step - accuracy: 1.0000 - loss: 1.2753e-07 - val_accuracy: 1.0000 - val_loss: 9.1839e-08
Epoch 14/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 11ms/step - accuracy: 1.0000 - loss: 1.0660e-07 - val_accuracy: 1.0000 - val_loss: 7.7105e-08
Epoch 15/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 11ms/step - accuracy: 1.0000 - loss: 8.5175e-08 - val_accuracy: 1.0000 - val_loss: 6.2227e-08
Epoch 16/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 11ms/step - accuracy: 1.0000 - loss: 6.8174e-08 - val_accuracy: 1.0000 - val_loss: 5.1761e-08
Epoch 17/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 11ms/step - accuracy: 1.0000 - loss: 5.9180e-08 - val_accuracy: 1.0000 - val_loss: 4.0817e-08
Epoch 18/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 8ms/step - accuracy: 1.0000 - loss: 4.6687e-08 - val_accuracy: 1.0000 - val_loss: 3.4928e-08
Epoch 19/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 2s 8ms/step - accuracy: 1.0000 - loss: 3.9397e-08 - val_accuracy: 1.0000 - val_loss: 2.6560e-08
Epoch 20/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 11ms/step - accuracy: 1.0000 - loss: 3.1368e-08 - val_accuracy: 1.0000 - val_loss: 2.2221e-08
Epoch 21/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 8ms/step - accuracy: 1.0000 - loss: 2.4923e-08 - val_accuracy: 1.0000 - val_loss: 1.9097e-08
Epoch 22/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 10ms/step - accuracy: 1.0000 - loss: 2.1103e-08 - val_accuracy: 1.0000 - val_loss: 1.7500e-08
Epoch 23/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 10ms/step - accuracy: 1.0000 - loss: 1.9780e-08 - val_accuracy: 1.0000 - val_loss: 1.4901e-08
Epoch 24/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 11ms/step - accuracy: 1.0000 - loss: 1.7026e-08 - val_accuracy: 1.0000 - val_loss: 9.5844e-09
Epoch 25/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 1.1108e-08 - val_accuracy: 1.0000 - val_loss: 6.7949e-09
Epoch 26/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 11ms/step - accuracy: 1.0000 - loss: 7.8710e-09 - val_accuracy: 1.0000 - val_loss: 5.7936e-09
Epoch 27/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 11ms/step - accuracy: 1.0000 - loss: 7.7877e-09 - val_accuracy: 1.0000 - val_loss: 5.7936e-09
Epoch 28/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 6.0513e-09 - val_accuracy: 1.0000 - val_loss: 4.1962e-09
Epoch 29/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 8ms/step - accuracy: 1.0000 - loss: 4.9249e-09 - val_accuracy: 1.0000 - val_loss: 4.1962e-09
Epoch 30/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 10ms/step - accuracy: 1.0000 - loss: 4.9642e-09 - val_accuracy: 1.0000 - val_loss: 4.1962e-09
Epoch 31/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 5.0774e-09 - val_accuracy: 1.0000 - val_loss: 2.5988e-09
Epoch 32/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 3.1871e-09 - val_accuracy: 1.0000 - val_loss: 2.5988e-09
Epoch 33/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 10ms/step - accuracy: 1.0000 - loss: 2.5585e-09 - val_accuracy: 1.0000 - val_loss: 1.5974e-09
Epoch 34/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 2.3927e-09 - val_accuracy: 1.0000 - val_loss: 1.5974e-09
Epoch 35/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 11ms/step - accuracy: 1.0000 - loss: 2.2414e-09 - val_accuracy: 1.0000 - val_loss: 1.5974e-09
Epoch 36/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 5s 9ms/step - accuracy: 1.0000 - loss: 2.0437e-09 - val_accuracy: 1.0000 - val_loss: 1.5974e-09
Epoch 37/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 1.0000 - loss: 2.1289e-09 - val_accuracy: 1.0000 - val_loss: 1.5974e-09
Epoch 38/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 8ms/step - accuracy: 1.0000 - loss: 1.8886e-09 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 39/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 9ms/step - accuracy: 1.0000 - loss: 4.3289e-10 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 40/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 11ms/step - accuracy: 1.0000 - loss: 2.0748e-10 - val_accuracy: 1.0000 - val_loss: 1.5974e-09
Epoch 41/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 8ms/step - accuracy: 1.0000 - loss: 1.9574e-10 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 42/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 10ms/step - accuracy: 1.0000 - loss: 8.6450e-11 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 43/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 11ms/step - accuracy: 1.0000 - loss: 1.0214e-10 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 44/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 8.9041e-11 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 45/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 1.4268e-11 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 46/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 11ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 47/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 48/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 9ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 49/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 5s 9ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 50/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 2s 8ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 51/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 10ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 52/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 53/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 54/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 55/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 56/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 9ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 57/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 58/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 59/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 60/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 10ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 61/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 11ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 62/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 63/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 64/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 65/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 66/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 67/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 68/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 69/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 9ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 70/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 71/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 72/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 73/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 74/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 75/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 76/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 77/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 78/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 79/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 80/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 81/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 82/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 83/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 84/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 85/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 86/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 87/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 88/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 3s 11ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 89/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 90/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 91/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 92/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 93/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 9ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 94/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 95/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 12ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 96/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 97/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 98/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 9ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 99/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
Epoch 100/100
300/300 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 1.0000 - loss: 0.0000e+00 - val_accuracy: 1.0000 - val_loss: 0.0000e+00
<keras.src.callbacks.history.History at 0x1bd90589a10>
val_loss, val_accuracy = ann.evaluate(testing_padded, y_test)
print("\nBest Model Validation Accuracy:", val_accuracy)
print("\nBest Model Validation Loss:", val_loss)  
157/157 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 1.0000 - loss: 0.0000e+00

Best Model Validation Accuracy: 1.0

Best Model Validation Loss: 0.0
test= ['I like the product, it is excellent, but it has some issue, it may creat problem sometime']
import re
from nltk.stem import WordNetLemmatizer
lm = WordNetLemmatizer()
def tokenization_review(text):  # Expecting text as a string, not a row
    text = re.sub(r'[^a-zA-Z0-9\s.,!?]', ' ', str(text))  # Keep alphanumeric and punctuation
    text = text.lower()  # Convert to lowercase
    words = text.split()
    words = [lm.lemmatize(word) for word in words if word not in stopwords]
    return ' '.join(words) 
test_input=tokenization_review(test)
test_input
'like product, excellent, issue, may creat problem sometime'
tokenizer.fit_on_texts([test_input])
text_seq=tokenizer.texts_to_sequences([test_input])
text_seq
[[25, 2, 115, 139, 140, 141, 142, 143]]
#test_padded = pad_sequences(text_seq, padding='post') 
test_padded = np.array(text_seq)
test_padded
array([[ 25,   2, 115, 139, 140, 141, 142, 143]])
predicted_labels = ann.predict(test_padded)
predicted_labels
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 39ms/step
array([[1.1912066e-06, 9.9998486e-01, 1.3997794e-05]], dtype=float32)
predicted_labels = np.argmax(predicted_labels, axis=1)  # Get class index
print(predicted_labels)
[1]